From d0a2f225e3296b21b963ffd835e81886c7e86184 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Wed, 7 Nov 2007 11:01:23 -0700 Subject: [PATCH] [IA64] Simplify thash_purge_and_insert() This patch simplifies thash_purge_and_insert() for readability. PV domain never use this function. Signed-off-by: Kouya Shimura --- xen/arch/ia64/vmx/vtlb.c | 73 +++++++++++++------------------------ xen/include/asm-ia64/vmmu.h | 1 - 2 files changed, 25 insertions(+), 49 deletions(-) diff --git a/xen/arch/ia64/vmx/vtlb.c b/xen/arch/ia64/vmx/vtlb.c index c2bc9b7f82..3d9dade678 100644 --- a/xen/arch/ia64/vmx/vtlb.c +++ b/xen/arch/ia64/vmx/vtlb.c @@ -413,7 +413,7 @@ static thash_data_t *__alloc_chain(thash_cb_t *hcb) * 3: The caller need to make sure the new entry will not overlap * with any existed entry. */ -void vtlb_insert(VCPU *v, u64 pte, u64 itir, u64 va) +static void vtlb_insert(VCPU *v, u64 pte, u64 itir, u64 va) { thash_data_t *hash_table, *cch; /* int flag; */ @@ -422,6 +422,8 @@ void vtlb_insert(VCPU *v, u64 pte, u64 itir, u64 va) u64 tag, len; thash_cb_t *hcb = &v->arch.vtlb; + vcpu_quick_region_set(PSCBX(v, tc_regions), va); + vcpu_get_rr(v, va, &vrr.rrval); vrr.ps = itir_ps(itir); VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps); @@ -545,60 +547,35 @@ u64 translate_phy_pte(VCPU *v, u64 *pte, u64 itir, u64 va) */ int thash_purge_and_insert(VCPU *v, u64 pte, u64 itir, u64 ifa, int type) { - u64 ps;//, va; - u64 phy_pte; + u64 ps, phy_pte, psr; ia64_rr mrr; - int ret = 0; ps = itir_ps(itir); mrr.rrval = ia64_get_rr(ifa); - if (VMX_DOMAIN(v)) { - phy_pte = translate_phy_pte(v, &pte, itir, ifa); - - if (pte & VTLB_PTE_IO) - ret = 1; - vtlb_purge(v, ifa, ps); - vhpt_purge(v, ifa, ps); - if (ps == mrr.ps) { - if (!(pte & VTLB_PTE_IO)) { - vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa); - } - else{ - vtlb_insert(v, pte, itir, ifa); - vcpu_quick_region_set(PSCBX(v, tc_regions), ifa); - } - } - else if (ps > mrr.ps) { - vtlb_insert(v, pte, itir, ifa); - vcpu_quick_region_set(PSCBX(v, tc_regions), ifa); - if (!(pte & VTLB_PTE_IO)) { - vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa); - } - } - else { - u64 psr; - - vtlb_insert(v, pte, itir, ifa); - vcpu_quick_region_set(PSCBX(v, tc_regions), ifa); - if (!(pte & VTLB_PTE_IO)) { - phy_pte &= ~PAGE_FLAGS_RV_MASK; - psr = ia64_clear_ic(); - ia64_itc(type + 1, ifa, phy_pte, IA64_ITIR_PS_KEY(ps, 0)); - ia64_set_psr(psr); - ia64_srlz_i(); - } - } + + phy_pte = translate_phy_pte(v, &pte, itir, ifa); + + vtlb_purge(v, ifa, ps); + vhpt_purge(v, ifa, ps); + + if (pte & VTLB_PTE_IO) { + vtlb_insert(v, pte, itir, ifa); + return 1; } - else{ - phy_pte = translate_phy_pte(v, &pte, itir, ifa); - if (ps != PAGE_SHIFT) { - vtlb_insert(v, pte, itir, ifa); - vcpu_quick_region_set(PSCBX(v, tc_regions), ifa); - } - machine_tlb_purge(ifa, ps); + + if (ps != mrr.ps) + vtlb_insert(v, pte, itir, ifa); + + if (ps >= mrr.ps) { vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa); + } else { /* Subpaging */ + phy_pte &= ~PAGE_FLAGS_RV_MASK; + psr = ia64_clear_ic(); + ia64_itc(type + 1, ifa, phy_pte, IA64_ITIR_PS_KEY(ps, 0)); + ia64_set_psr(psr); + ia64_srlz_i(); } - return ret; + return 0; } /* diff --git a/xen/include/asm-ia64/vmmu.h b/xen/include/asm-ia64/vmmu.h index 89e32e1e08..d9d535b3f1 100644 --- a/xen/include/asm-ia64/vmmu.h +++ b/xen/include/asm-ia64/vmmu.h @@ -216,7 +216,6 @@ extern void machine_tlb_purge(u64 va, u64 ps); extern unsigned long fetch_code(struct vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle); extern void emulate_io_inst(struct vcpu *vcpu, u64 padr, u64 ma); extern int vhpt_enabled(struct vcpu *vcpu, uint64_t vadr, vhpt_ref_t ref); -extern void vtlb_insert(struct vcpu *vcpu, u64 pte, u64 itir, u64 va); extern u64 translate_phy_pte(struct vcpu *v, u64 *pte, u64 itir, u64 va); extern void thash_vhpt_insert(struct vcpu *v, u64 pte, u64 itir, u64 ifa, int type); -- 2.30.2